From: Keir Fraser Date: Mon, 6 Jul 2009 10:57:18 +0000 (+0100) Subject: AMD IOMMU: Add suspend and resume support for amd iommu. X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~13640 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22?a=commitdiff_plain;h=6e07e931945eecf9beb5fe9ebe70a7ba399e634d;p=xen.git AMD IOMMU: Add suspend and resume support for amd iommu. Signed-off-by: Wei Wang --- diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c index cea3c91df8..cc1e81c132 100644 --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -716,3 +716,83 @@ error_out: } return -ENOMEM; } + +static void disable_iommu(struct amd_iommu *iommu) +{ + unsigned long flags; + + spin_lock_irqsave(&iommu->lock, flags); + + if ( !iommu->enabled ) + { + spin_unlock_irqrestore(&iommu->lock, flags); + return; + } + + amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED); + set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED); + set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED); + set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED); + + iommu->enabled = 0; + + spin_unlock_irqrestore(&iommu->lock, flags); + +} + +static void invalidate_all_domain_pages(void) +{ + struct domain *d; + for_each_domain( d ) + invalidate_all_iommu_pages(d); +} + +static void invalidate_all_devices(void) +{ + u16 bus, devfn, bdf, req_id; + unsigned long flags; + struct amd_iommu *iommu; + + for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) + { + bus = bdf >> 8; + devfn = bdf & 0xFF; + iommu = find_iommu_for_device(bus, devfn); + req_id = ivrs_mappings[bdf].dte_requestor_id; + if ( iommu ) + { + spin_lock_irqsave(&iommu->lock, flags); + invalidate_dev_table_entry(iommu, req_id); + invalidate_interrupt_table(iommu, req_id); + flush_command_buffer(iommu); + spin_unlock_irqrestore(&iommu->lock, flags); + } + } +} + +void amd_iommu_suspend(void) +{ + struct amd_iommu *iommu; + + for_each_amd_iommu ( iommu ) + disable_iommu(iommu); +} + +void amd_iommu_resume(void) +{ + struct amd_iommu *iommu; + + for_each_amd_iommu ( iommu ) + { + /* + * To make sure that iommus have not been touched + * before re-enablement + */ + disable_iommu(iommu); + enable_iommu(iommu); + } + + /* flush all cache entries after iommu re-enabled */ + invalidate_all_devices(); + invalidate_all_domain_pages(); +}